In [1]:
from pearce.emulator import OriginalRecipe, ExtraCrispy
from pearce.mocks import cat_dict
import numpy as np
from os import path
In [2]:
import matplotlib
#matplotlib.use('Agg')
from matplotlib import pyplot as plt
%matplotlib inline
import seaborn as sns
sns.set()
In [3]:
training_dir = '/u/ki/swmclau2/des/PearceLHC_wp_z_corrab_emulator/'
em_method = 'gp'
split_method = 'random'
In [4]:
a = 1.0
z = 1./a-1.0
In [5]:
fixed_params = {'z':z}#, 'r':0.18477483}
In [6]:
emu = OriginalRecipe(training_dir, method = em_method, fixed_params=fixed_params)
In [7]:
emu.scale_bin_centers
Out[7]:
In [8]:
emu._ordered_params
Out[8]:
In [9]:
emu._get_initial_guess(None)
Out[9]:
In [10]:
def nll(p):
# Update the kernel parameters and compute the likelihood.
# params are log(a) and log(m)
#ll = 0
#for emulator, _y in izip(self._emulators, self.y):
# emulator.kernel[:] = p
# ll += emulator.lnlikelihood(_y, quiet=True)
emu._emulator.kernel[ab_param_idxs] = p
#print p
ll= emu._emulator.lnlikelihood(emu.y, quiet=False)
# The scipy optimizer doesn't play well with infinities.
return -ll if np.isfinite(ll) else 1e25
# And the gradient of the objective function.
def grad_nll(p):
# Update the kernel parameters and compute the likelihood.
#gll = 0
#for emulator, _y in izip(self._emulators, self.y):
# emulator.kernel[:] = p
# gll += emulator.grad_lnlikelihood(_y, quiet=True)
emu._emulator.kernel[ab_param_idxs] = p
gll = emu._emulator.grad_lnlikelihood(emu.y, quiet=True)
return -gll[ab_param_idxs]
In [11]:
ab_param_names = ['mean_occupation_centrals_assembias_param1',
#'mean_occupation_centrals_assembias_slope1',
#'mean_occupation_centrals_assembias_split1',
'mean_occupation_satellites_assembias_param1']#,
#'mean_occupation_satellites_assembias_slope1',
#'mean_occupation_satellites_assembias_split1']
In [12]:
ab_param_idxs = []
for apn in ab_param_names:
ab_param_idxs.append(emu._ordered_params.keys().index(apn)+1)
ab_param_idxs = np.array(ab_param_idxs)
In [13]:
p0 = np.ones_like(ab_param_idxs) #emu._emulator.kernel.vector[ab_param_idxs]
In [14]:
p0
Out[14]:
In [15]:
import emcee as mc
In [31]:
def lnprior(theta, *args):
return -np.inf if np.any(np.logical_or(theta < -2, theta > 2)) else 0
def lnprob(theta, *args):
lp = lnprior(theta, *args)
#print lp
if not np.isfinite(lp):
return -np.inf
output = lp - nll(theta, *args)
#print output
return output
#return lp - nll(theta, *args)
In [56]:
nwalkers = 100
nsteps = 100
nburn = 0
num_params = len(p0)
pos0 = np.random.randn(nwalkers, num_params)*2.0/3
ncores = 1
In [57]:
_x, _y, _yerr = emu.x, emu.y, emu.yerr
In [58]:
n = 200
emu.x = _x[:n*emu.scale_bin_centers.shape[0], :]
emu.y = _y[:n*emu.scale_bin_centers.shape[0]]
emu.yerr = _yerr[:n*emu.scale_bin_centers.shape[0]]
emu._build_gp(emu._get_initial_guess(None))
In [59]:
sampler = mc.EnsembleSampler(nwalkers, num_params, lnprob, threads=ncores)
In [60]:
from time import time
In [61]:
t0 = time()
sampler.run_mcmc(pos0, nsteps)
print time()-t0
In [23]:
chain = sampler.chain[:, nburn:, :].reshape((-1, num_params))
In [55]:
chain
Out[55]:
In [ ]: